Extend Xen's evtchn interface.
} u;
} xc_evtchn_status_t;
+int xc_evtchn_alloc_unbound(int xc_handle,
+ u32 dom,
+ int *port);
int xc_evtchn_bind_interdomain(int xc_handle,
u32 dom1, /* may be DOMID_SELF */
u32 dom2, /* may be DOMID_SELF */
}
+int xc_evtchn_alloc_unbound(int xc_handle,
+ u32 dom,
+ int *port)
+{
+ evtchn_op_t op;
+ int rc;
+
+ op.cmd = EVTCHNOP_alloc_unbound;
+ op.u.alloc_unbound.dom = (domid_t)dom;
+
+ if ( (rc = do_evtchn_op(xc_handle, &op)) == 0 )
+ {
+ if ( port != NULL )
+ *port = op.u.alloc_unbound.port;
+ }
+
+ return rc;
+}
+
+
int xc_evtchn_bind_interdomain(int xc_handle,
u32 dom1,
u32 dom2,
int rc;
op.cmd = EVTCHNOP_bind_interdomain;
- op.u.bind_interdomain.dom1 = (domid_t)dom1;
- op.u.bind_interdomain.dom2 = (domid_t)dom2;
-
+ op.u.bind_interdomain.dom1 = (domid_t)dom1;
+ op.u.bind_interdomain.dom2 = (domid_t)dom2;
+ op.u.bind_interdomain.port1 = (port1 != NULL) ? *port1 : 0;
+ op.u.bind_interdomain.port2 = (port2 != NULL) ? *port2 : 0;
+
+
if ( (rc = do_evtchn_op(xc_handle, &op)) == 0 )
{
if ( port1 != NULL )
"warpu", warpu);
}
+static PyObject *pyxc_evtchn_alloc_unbound(PyObject *self,
+ PyObject *args,
+ PyObject *kwds)
+{
+ XcObject *xc = (XcObject *)self;
+
+ u32 dom;
+ int port;
+
+ static char *kwd_list[] = { "dom", NULL };
+
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i", kwd_list, &dom) )
+ return NULL;
+
+ if ( xc_evtchn_alloc_unbound(xc->xc_handle, dom, &port) != 0 )
+ return PyErr_SetFromErrno(xc_error);
+
+ return PyInt_FromLong(port);
+}
+
static PyObject *pyxc_evtchn_bind_interdomain(PyObject *self,
PyObject *args,
PyObject *kwds)
XcObject *xc = (XcObject *)self;
u32 dom1 = DOMID_SELF, dom2 = DOMID_SELF;
- int port1, port2;
+ int port1 = 0, port2 = 0;
- static char *kwd_list[] = { "dom1", "dom2", NULL };
+ static char *kwd_list[] = { "dom1", "dom2", "port1", "port2", NULL };
- if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list,
- &dom1, &dom2) )
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiii", kwd_list,
+ &dom1, &dom2, &port1, &port2) )
return NULL;
if ( xc_evtchn_bind_interdomain(xc->xc_handle, dom1,
"Returns [dict]:\n"
" slice [long]: Scheduler time slice.\n" },
+ { "evtchn_alloc_unbound",
+ (PyCFunction)pyxc_evtchn_alloc_unbound,
+ METH_VARARGS | METH_KEYWORDS, "\n"
+ "Allocate an unbound local port that will await a remote connection.\n"
+ " dom [int]: Remote domain to accept connections from.\n\n"
+ "Returns: [int] Unbound event-channel port.\n" },
+
{ "evtchn_bind_interdomain",
(PyCFunction)pyxc_evtchn_bind_interdomain,
METH_VARARGS | METH_KEYWORDS, "\n"
{ "evtchn_close",
(PyCFunction)pyxc_evtchn_close,
METH_VARARGS | METH_KEYWORDS, "\n"
- "Close an event channel.\n"
+ "Close an event channel. If interdomain, sets remote end to 'unbound'.\n"
" dom [int, SELF]: Dom-id of one endpoint of the channel.\n"
" port [int]: Port-id of one endpoint of the channel.\n\n"
"Returns: [int] 0 on success; -1 on error.\n" },
{
xu_port_object *xup;
u32 dom;
- int port1, port2;
+ int port1 = 0, port2 = 0;
if ( !PyArg_ParseTuple(args, "i", &dom) )
return NULL;
#define INIT_EVENT_CHANNELS 16
#define MAX_EVENT_CHANNELS 1024
+
static int get_free_port(struct domain *d)
{
int max, port;
return port;
}
+
+static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
+{
+ struct domain *d = current;
+ int port;
+
+ spin_lock(&d->event_channel_lock);
+
+ if ( (port = get_free_port(d)) >= 0 )
+ {
+ d->event_channel[port].state = ECS_UNBOUND;
+ d->event_channel[port].u.unbound.remote_domid = alloc->dom;
+ }
+
+ spin_unlock(&d->event_channel_lock);
+
+ if ( port < 0 )
+ return port;
+
+ alloc->port = port;
+ return 0;
+}
+
+
static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
{
+#define ERROR_EXIT(_errno) do { rc = (_errno); goto out; } while ( 0 )
struct domain *d1, *d2;
- int port1 = 0, port2 = 0;
+ int port1 = bind->port1, port2 = bind->port2;
domid_t dom1 = bind->dom1, dom2 = bind->dom2;
long rc = 0;
- if ( !IS_PRIV(current) )
+ if ( !IS_PRIV(current) && (dom1 != DOMID_SELF) )
return -EPERM;
+ if ( (port1 < 0) || (port2 < 0) )
+ return -EINVAL;
+
if ( dom1 == DOMID_SELF )
dom1 = current->id;
if ( dom2 == DOMID_SELF )
spin_lock(&d1->event_channel_lock);
}
- if ( (port1 = get_free_port(d1)) < 0 )
+ /* Obtain, or ensure that we already have, a valid <port1>. */
+ if ( port1 == 0 )
{
- rc = port1;
- goto out;
+ if ( (port1 = get_free_port(d1)) < 0 )
+ ERROR_EXIT(port1);
}
+ else if ( port1 >= d1->max_event_channel )
+ ERROR_EXIT(-EINVAL);
- /* 'Allocate' port1 before searching for a free port2. */
- d1->event_channel[port1].state = ECS_INTERDOMAIN;
+ /* Obtain, or ensure that we already have, a valid <port2>. */
+ if ( port2 == 0 )
+ {
+ /* Make port1 non-free while we allocate port2 (in case dom1==dom2). */
+ u16 tmp = d1->event_channel[port1].state;
+ d1->event_channel[port1].state = ECS_INTERDOMAIN;
+ port2 = get_free_port(d2);
+ d1->event_channel[port1].state = tmp;
+ if ( port2 < 0 )
+ ERROR_EXIT(port2);
+ }
+ else if ( port2 >= d2->max_event_channel )
+ ERROR_EXIT(-EINVAL);
- if ( (port2 = get_free_port(d2)) < 0 )
+ /* Validate <dom1,port1>'s current state. */
+ switch ( d1->event_channel[port1].state )
{
- d1->event_channel[port1].state = ECS_FREE;
- rc = port2;
+ case ECS_FREE:
+ break;
+
+ case ECS_UNBOUND:
+ if ( d1->event_channel[port1].u.unbound.remote_domid != dom2 )
+ ERROR_EXIT(-EINVAL);
+ break;
+
+ case ECS_INTERDOMAIN:
+ rc = ((d1->event_channel[port1].u.interdomain.remote_dom != d2) ||
+ (d1->event_channel[port1].u.interdomain.remote_port != port2)) ?
+ -EINVAL : 0;
goto out;
+
+ default:
+ ERROR_EXIT(-EINVAL);
+ }
+
+ /* Validate <dom2,port2>'s current state. */
+ switch ( d2->event_channel[port2].state )
+ {
+ case ECS_FREE:
+ break;
+
+ case ECS_UNBOUND:
+ if ( d2->event_channel[port2].u.unbound.remote_domid != dom1 )
+ ERROR_EXIT(-EINVAL);
+ break;
+
+ default:
+ ERROR_EXIT(-EINVAL);
}
+ /*
+ * Everything checked out okay -- bind <dom1,port1> to <dom2,port2>.
+ */
+
d1->event_channel[port1].u.interdomain.remote_dom = d2;
d1->event_channel[port1].u.interdomain.remote_port = (u16)port2;
-
+ d1->event_channel[port1].state = ECS_INTERDOMAIN;
+
d2->event_channel[port2].u.interdomain.remote_dom = d1;
d2->event_channel[port2].u.interdomain.remote_port = (u16)port1;
d2->event_channel[port2].state = ECS_INTERDOMAIN;
bind->port2 = port2;
return rc;
+#undef ERROR_EXIT
}
BUG();
chn2[port2].state = ECS_UNBOUND;
+ chn2[port2].u.unbound.remote_domid = d1->id;
break;
default:
break;
case ECS_UNBOUND:
status->status = EVTCHNSTAT_unbound;
+ status->u.unbound.dom = chn[port].u.unbound.remote_domid;
break;
case ECS_INTERDOMAIN:
status->status = EVTCHNSTAT_interdomain;
switch ( op.cmd )
{
+ case EVTCHNOP_alloc_unbound:
+ rc = evtchn_alloc_unbound(&op.u.alloc_unbound);
+ if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
+ rc = -EFAULT; /* Cleaning up here would be a mess! */
+ break;
+
case EVTCHNOP_bind_interdomain:
rc = evtchn_bind_interdomain(&op.u.bind_interdomain);
if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
#define __HYPERVISOR_IFS__EVENT_CHANNEL_H__
/*
- * EVTCHNOP_bind_interdomain: Open an event channel between <dom1> and <dom2>.
+ * EVTCHNOP_alloc_unbound: Allocate a fresh local port and prepare
+ * it for binding to <dom>.
+ */
+#define EVTCHNOP_alloc_unbound 6
+typedef struct {
+ /* IN parameters */
+ domid_t dom; /* 0 */
+ u16 __pad;
+ /* OUT parameters */
+ u32 port; /* 4 */
+} PACKED evtchn_alloc_unbound_t; /* 8 bytes */
+
+/*
+ * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
+ * <dom1> and <dom2>. Either <port1> or <port2> may be wildcarded by setting to
+ * zero. On successful return both <port1> and <port2> are filled in and
+ * <dom1,port1> is fully bound to <dom2,port2>.
+ *
* NOTES:
- * 1. <dom1> and/or <dom2> may be specified as DOMID_SELF.
- * 2. Only a sufficiently-privileged domain may create an event channel.
- * 3. <port1> and <port2> are only supplied if the op succeeds.
+ * 1. A wildcarded port is allocated from the relevant domain's free list
+ * (i.e., some port that was previously EVTCHNSTAT_closed). However, if the
+ * remote port pair is already fully bound then a port is not allocated,
+ * and instead the existing local port is returned to the caller.
+ * 2. If the caller is unprivileged then <dom1> must be DOMID_SELF.
+ * 3. If the caller is unprivileged and <dom2,port2> is EVTCHNSTAT_closed
+ * then <dom2> must be DOMID_SELF.
+ * 4. If either port is already bound then it must be bound to the other
+ * specified domain and port (if not wildcarded).
+ * 5. If either port is awaiting binding (EVTCHNSTAT_unbound) then it must
+ * be awaiting binding to the other domain, and the other port pair must
+ * be closed or unbound.
*/
#define EVTCHNOP_bind_interdomain 0
typedef struct {
/* IN parameters. */
domid_t dom1, dom2; /* 0, 2 */
- /* OUT parameters. */
+ /* IN/OUT parameters. */
u32 port1, port2; /* 4, 8 */
} PACKED evtchn_bind_interdomain_t; /* 12 bytes */
/*
* EVTCHNOP_close: Close the communication channel which has an endpoint at
- * <dom, port>.
+ * <dom, port>. If the channel is interdomain then the remote end is placed in
+ * the unbound state (EVTCHNSTAT_unbound), awaiting a new connection.
* NOTES:
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may close an event channel
u16 __pad;
u32 port; /* 4 */
/* OUT parameters */
-#define EVTCHNSTAT_closed 0 /* Chennel is not in use. */
-#define EVTCHNSTAT_unbound 1 /* Channel is not bound to a source. */
+#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
+#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
u32 status; /* 8 */
union { /* 12 */
+ struct {
+ domid_t dom; /* 12 */
+ } PACKED unbound; /* EVTCHNSTAT_unbound */
struct {
domid_t dom; /* 12 */
u16 __pad;
u32 cmd; /* EVTCHNOP_* */ /* 0 */
u32 __reserved; /* 4 */
union { /* 8 */
+ evtchn_alloc_unbound_t alloc_unbound;
evtchn_bind_interdomain_t bind_interdomain;
evtchn_bind_virq_t bind_virq;
evtchn_bind_pirq_t bind_pirq;